library(dada2)
packageVersion("dada2") # check dada2 version
## [1] '1.21.0'
library(Biostrings)
library(ShortRead)
library(seqTools) # per base sequence content
library(phyloseq)
library(ggplot2)
library(data.table)
library(plyr)
library(dplyr)
library(qckitfastq) # per base sequence content
library(stringr)
First, we import the fastq files containing the raw reads. The samples were downloaded from the ENA database with the accession number PRJNA566284.
# Save the path to the directory containing the fastq zipped files
path <- "~/Projects/IBS_Meta-analysis_16S/data/analysis-individual/Zhu-2019"
# list.files(path) # check we are in the right directory
# fastq filenames have format: SAMPLENAME.fastq.gz
# Saves the whole directory path to each file name
fnFs <- sort(list.files(file.path(path, "original"), pattern="_1.fastq.gz", full.names = TRUE)) # FWD reads
fnRs <- sort(list.files(file.path(path, "original"), pattern="_2.fastq.gz", full.names = TRUE)) # REV reads
show(fnFs[1:5])
show(fnRs[1:5])
# Extract sample names, assuming filenames have format: SAMPLENAME.fastq.gz
sample.names <- sapply(strsplit(basename(fnFs), "_"), `[`, 1)
show(sample.names) # saves only the file name (without the path)
# Look at quality of all files
for (i in 1:3){ # 1:length(fnFs)
show(plotQualityProfile(fnFs[i]))
show(plotQualityProfile(fnRs[i]))
}
# Look at nb of reads per sample
# raw_stats <- data.frame('sample' = sample.names,
# 'reads' = fastqq(fnFs)@nReads)
# min(raw_stats$reads)
# max(raw_stats$reads)
# mean(raw_stats$reads)
We will have a quick peak at the per base sequence content of the reads in some samples, to make sure there is no anomaly (i.e. all reads having the same sequence).
# Look at per base sequence content (forward read)
fseqF1 <- seqTools::fastqq(fnFs[10])
## [fastqq] File ( 1/1) '/Users/scarcy/Projects/IBS_Meta-analysis_16S/data/analysis-individual/Zhu-2019/original/SRR10142835_1.fastq.gz' done.
rcF1 <- read_content(fseqF1)
plot_read_content(rcF1) + labs(title = "Per base sequence content - Forward read")
fseqR1 <- seqTools::fastqq(fnRs[10])
## [fastqq] File ( 1/1) '/Users/scarcy/Projects/IBS_Meta-analysis_16S/data/analysis-individual/Zhu-2019/original/SRR10142835_2.fastq.gz' done.
rcR1 <- read_content(fseqR1)
plot_read_content(rcR1) + labs(title = "Per base sequence content - Reverse read")
Now, we will look whether the reads still contain the primers. Primer sequences are given in the methods section of the paper.
# V4
FWD <- "GTGCCAGCMGCCGCGGTAA" # U515 forward primer sequence
REV <- "GGACTACHVGGGTWTCTAAT" # E786 reverse primer sequence (in reality, it's the sequence of 5'-806-786-3')
# Function that, from the primer sequence, will return all combinations possible (complement, reverse complement, ...)
allOrients <- function(primer) {
# Create all orientations of the input sequence
require(Biostrings)
dna <- DNAString(primer) # The Biostrings works w/ DNAString objects rather than character vectors
orients <- c(Forward = dna, Complement = complement(dna), Reverse = reverse(dna),
RevComp = reverseComplement(dna))
return(sapply(orients, toString)) # Convert back to character vector
}
# Get all combinations of the primer sequences
FWD.orients <- allOrients(FWD) # U515
REV.orients <- allOrients(REV) # E786
FWD.orients # sanity check
## Forward Complement Reverse
## "GTGCCAGCMGCCGCGGTAA" "CACGGTCGKCGGCGCCATT" "AATGGCGCCGMCGACCGTG"
## RevComp
## "TTACCGCGGCKGCTGGCAC"
REV.orients
## Forward Complement Reverse
## "GGACTACHVGGGTWTCTAAT" "CCTGATGDBCCCAWAGATTA" "TAATCTWTGGGVHCATCAGG"
## RevComp
## "ATTAGAWACCCBDGTAGTCC"
# Function that counts number of reads in which a sequence is found
primerHits <- function(primer, fn) {
nhits <- vcountPattern(primer, sread(readFastq(fn)), fixed = FALSE, max.mismatch = 2)
return(sum(nhits > 0))
}
# Get a table to know how many times the U515 and E786 primers are found in the reads of each sample
for (i in 6:9){
cat("SAMPLE", sample.names[i], "with total number of", raw_stats[i,'reads'], "reads\n\n")
x <- rbind(ForwardRead.FWDPrimer = sapply(FWD.orients, primerHits, fn = fnFs[[i]]),
ForwardRead.REVPrimer = sapply(REV.orients, primerHits, fn = fnFs[[i]]),
ReverseRead.FWDPrimer = sapply(FWD.orients, primerHits, fn = fnRs[[i]]),
ReverseRead.REVPrimer = sapply(REV.orients, primerHits, fn = fnRs[[i]]))
print(x)
cat("\n____________________________________________\n\n")
}
## SAMPLE SRR10142831 with total number of 54075 reads
##
## Forward Complement Reverse RevComp
## ForwardRead.FWDPrimer 53518 18 17 17
## ForwardRead.REVPrimer 17 18 18 16
## ReverseRead.FWDPrimer 20 19 19 27
## ReverseRead.REVPrimer 21 19 19 19
##
## ____________________________________________
##
## SAMPLE SRR10142832 with total number of 51364 reads
##
## Forward Complement Reverse RevComp
## ForwardRead.FWDPrimer 50791 13 13 13
## ForwardRead.REVPrimer 13 14 13 13
## ReverseRead.FWDPrimer 15 17 14 17
## ReverseRead.REVPrimer 15 15 14 15
##
## ____________________________________________
##
## SAMPLE SRR10142833 with total number of 32638 reads
##
## Forward Complement Reverse RevComp
## ForwardRead.FWDPrimer 32569 0 0 0
## ForwardRead.REVPrimer 0 0 0 0
## ReverseRead.FWDPrimer 0 0 0 28114
## ReverseRead.REVPrimer 21 0 0 0
##
## ____________________________________________
##
## SAMPLE SRR10142834 with total number of 38514 reads
##
## Forward Complement Reverse RevComp
## ForwardRead.FWDPrimer 38442 0 0 0
## ForwardRead.REVPrimer 0 0 0 0
## ReverseRead.FWDPrimer 0 0 0 34209
## ReverseRead.REVPrimer 34 0 0 0
##
## ____________________________________________
The forward reads indeed contain the forward primer (in the middle of the reads), and the reverse reads contain the revcomp of the forward primer (which makes sense, it shows the forward primer is found at the end of the reverse reads). Ideally, to follow the standardized pipeline, we should (1) filter out reads not containing any primer and (2) trim the primers. However, the fastq files deposited on the SRA/ENA databases do not contain Illumina headers. Thus, during the merging step of paired reads later on, the Dada2 package won’t be able to match paired-reads.
We will simply perform a quality filtering of the reads.
Then, we perform a quality filtering of the reads.
# Place filtered files in a filtered/ subdirectory
FWD.filt2_samples <- file.path(path, "filtered2", paste0(sample.names, "_1_filt.fastq.gz")) # FWD reads
REV.filt2_samples <- file.path(path, "filtered2", paste0(sample.names, "_2_filt.fastq.gz")) # REV reads
# Assign names for the filtered fastq.gz files
names(FWD.filt2_samples) <- sample.names
names(REV.filt2_samples) <- sample.names
# Filter
out2 <- filterAndTrim(fwd = fnFs, filt = FWD.filt2_samples,
rev = fnRs, filt.rev = REV.filt2_samples,
maxEE=3, # reads with more than 3 expected errors (sum(10e(-Q/10))) are discarded
truncQ=10, # Truncate reads at the first instance of a quality score less than or equal to truncQ.
minLen = 150, # Discard reads shorter than 150 bp. This is done after trimming and truncation.
compress=TRUE, multithread = TRUE, verbose=TRUE)
Let’s look at the output filtered fastq files as sanity check.
out2[1:6,] # show how many reads were filtered in each file
## reads.in reads.out
## SRR10142826_1.fastq.gz 51061 48240
## SRR10142827_1.fastq.gz 55164 52238
## SRR10142828_1.fastq.gz 53155 50320
## SRR10142829_1.fastq.gz 51938 48538
## SRR10142830_1.fastq.gz 52139 49449
## SRR10142831_1.fastq.gz 54075 51228
# Look at quality profile of all filtered files
for (i in 1:3){
show(plotQualityProfile(FWD.filt2_samples[i]))
show(plotQualityProfile(REV.filt2_samples[i]))
}
Now we will build the parametric error model, to be able to infer amplicon sequence variants (ASVs) later on.
errF <- learnErrors(FWD.filt2_samples, multithread=TRUE, randomize=TRUE, verbose = 1)
errR <- learnErrors(REV.filt2_samples, multithread=TRUE, randomize=TRUE, verbose = 1)
The error rates for each possible transition (A→C, A→G, …) are shown. Points are the observed error rates for each consensus quality score. The black line shows the estimated error rates after convergence of the machine-learning algorithm. The red line shows the error rates expected under the nominal definition of the Q-score. Here the estimated error rates (black line) are a good fit to the observed rates (points), and the error rates drop with increased quality as expected.
plotErrors(errF, nominalQ = TRUE) # Forward reads
plotErrors(errR, nominalQ = TRUE) # Reverse reads
The dada() algorithm infers sequence variants based on estimated errors (previous step). Firstly, we de-replicate the reads in each sample, to reduce the computation time. De-replication is a common step in almost all modern ASV inference (or OTU picking) pipelines, but a unique feature of derepFastq is that it maintains a summary of the quality information for each dereplicated sequence in $quals.
# Dereplicate the reads in the sample
derepF <- derepFastq(FWD.filt2_samples) # forward
derepR <- derepFastq(REV.filt2_samples) # reverse
# Infer sequence variants
dadaFs <- dada(derepF, err=errF, multithread=TRUE) # forward
dadaRs <- dada(derepR, err=errR, multithread=TRUE) # reverse
# Inspect the infered sequence variants from sample 1:3
for (i in 1:3){
print(dadaFs[[i]])
print(dadaRs[[i]])
print("________________")
}
## dada-class: object describing DADA2 denoising results
## 172 sequence variants were inferred from 15778 input unique sequences.
## Key parameters: OMEGA_A = 1e-40, OMEGA_C = 1e-40, BAND_SIZE = 16
## dada-class: object describing DADA2 denoising results
## 197 sequence variants were inferred from 10173 input unique sequences.
## Key parameters: OMEGA_A = 1e-40, OMEGA_C = 1e-40, BAND_SIZE = 16
## [1] "________________"
## dada-class: object describing DADA2 denoising results
## 224 sequence variants were inferred from 16783 input unique sequences.
## Key parameters: OMEGA_A = 1e-40, OMEGA_C = 1e-40, BAND_SIZE = 16
## dada-class: object describing DADA2 denoising results
## 230 sequence variants were inferred from 11494 input unique sequences.
## Key parameters: OMEGA_A = 1e-40, OMEGA_C = 1e-40, BAND_SIZE = 16
## [1] "________________"
## dada-class: object describing DADA2 denoising results
## 218 sequence variants were inferred from 18811 input unique sequences.
## Key parameters: OMEGA_A = 1e-40, OMEGA_C = 1e-40, BAND_SIZE = 16
## dada-class: object describing DADA2 denoising results
## 251 sequence variants were inferred from 12751 input unique sequences.
## Key parameters: OMEGA_A = 1e-40, OMEGA_C = 1e-40, BAND_SIZE = 16
## [1] "________________"
We now need to merge paired reads.
mergers <- mergePairs(dadaFs, derepF, dadaRs, derepR, verbose=TRUE)
head(mergers[[1]])
We can now construct an amplicon sequence variant table (ASV) table, a higher-resolution version of the OTU table produced by traditional methods.
# Make sequence table from the infered sequence variants
seqtable <- makeSequenceTable(mergers)
# We should have 29 samples (29 rows)
dim(seqtable)
## [1] 29 5448
# Inspect distribution of sequence lengths
hist(nchar(getSequences(seqtable)), breaks = 100, xlab = "ASV length", ylab = "Number of ASVs", main="")
The V4 region should be ~250bp, and our ASVs are rather ~400bp long. Considering that the forward primer was found in the middle of the forward reads, it is possible that we have much longer ASVs than we should (pre-V4 region + V4 region). Let’s try trimming the ASVs until the end of the forward primer.
# Get the ASVs
asvs <- getSequences(seqtable)
# Trim the ASVs when they contain the forward primer.
# FWD primer: GTGCCAGCMGCCGCGGTAA (M: A or C)
new.asvs <- sub(".*GTGCCAGCAGCCGCGGTAA|.*GTGCCAGCCGCCGCGGTAA", "", getSequences(seqtable))
length(new.asvs) == length(asvs) # sanity check
## [1] TRUE
# Let's check the length of our new asvs
hist(nchar(new.asvs), breaks = 100, xlab = "ASV length", ylab = "Number of ASVs", main="")
# Put these new ASVs in a separate seqtable
seqtable.new <- seqtable
colnames(seqtable.new) <- new.asvs
# Sanity check
# for (i in 1:length(asvs)){
# test <- str_detect(string = getSequences(seqtable)[i],
# pattern = getSequences(seqtable.new)[i])
# if(test==FALSE){print('FALSE')}
# }
# Is there any duplicate ASVs now?
table(duplicated(new.asvs))
##
## FALSE TRUE
## 1706 3742
table(duplicated(asvs))
##
## FALSE
## 5448
# Create table to have corresponding ASVs pre/post-cut (to compare taxonomy assignment later on)
asv.df <- data.frame("asv" = paste0("ASV", 1:length(asvs)),
"pre_cut" = asvs,
"post_cut" = new.asvs)
# We will merge columns with same ASV and sum their counts
seqtable.small <- sapply(unique(colnames(seqtable.new)),
function(i) as.integer(rowSums(as.data.frame(seqtable.new[,colnames(seqtable.new) == i]))))
rownames(seqtable.small) <- rownames(seqtable.new) # add sample names as rownames
# sanity checks
ncol(seqtable.small) == length(unique(new.asvs))
## [1] TRUE
rowSums(seqtable.new) == rowSums(seqtable.small)
## SRR10142826 SRR10142827 SRR10142828 SRR10142829 SRR10142830 SRR10142831
## TRUE TRUE TRUE TRUE TRUE TRUE
## SRR10142832 SRR10142833 SRR10142834 SRR10142835 SRR10142836 SRR10142837
## TRUE TRUE TRUE TRUE TRUE TRUE
## SRR10142838 SRR10142839 SRR10142840 SRR10142841 SRR10142842 SRR10142843
## TRUE TRUE TRUE TRUE TRUE TRUE
## SRR10142844 SRR10142845 SRR10142846 SRR10142847 SRR10142848 SRR10142849
## TRUE TRUE TRUE TRUE TRUE TRUE
## SRR10142850 SRR10142851 SRR10142852 SRR10142853 SRR10142854
## TRUE TRUE TRUE TRUE TRUE
sum(seqtable.new) == sum(seqtable.small)
## [1] TRUE
# Check the ASVs are same sequence length, but we should have less of them
hist(nchar(getSequences(seqtable.small)), breaks = 100, xlab = "ASV length", ylab = "Number of ASVs", main="")
# Sequences should be between 515F - 806R, so around ~250bp (removing the primers lengths).
# Remove any sequence variant outside 200:300bp
seqtable.small.new <- seqtable.small[,nchar(colnames(seqtable.small)) %in% 200:300]
dim(seqtable.small.new)
## [1] 29 1686
hist(nchar(getSequences(seqtable.small.new)), breaks = 100, xlab = "ASV length", ylab = "Number of ASVs", main="")
# check we haven't lost too many counts
sum(seqtable.small.new)/sum(seqtable.small)
## [1] 0.9996091
The core dada method corrects substitution and indel errors, but chimeras remain. Fortunately, the accuracy of sequence variants after denoising makes identifying chimeric ASVs simpler than when dealing with fuzzy OTUs. Chimeric sequences are identified if they can be exactly reconstructed by combining a left-segment and a right-segment from two more abundant “parent” sequences.
seqtable.nochim <- removeBimeraDenovo(seqtable, method="consensus", multithread=TRUE, verbose=TRUE)
# Check how many sequence variants we have after removing chimeras
dim(seqtable.nochim)
## [1] 29 1070
# Check how many reads we have after removing chimeras (we should keep the vast majority of the reads, like > 80%)
sum(seqtable.nochim)/sum(seqtable)
## [1] 0.8905331
# Same for the table with ASVs where I cut the FWD primer
seqtable.small.nochim <- removeBimeraDenovo(seqtable.small.new, method="consensus", multithread=TRUE, verbose=TRUE)
dim(seqtable.small.nochim)
## [1] 29 681
sum(seqtable.small.nochim)/sum(seqtable.small.new)
## [1] 0.9477519
Sanity check before assigning taxonomy.
# Function that counts nb of reads
getN <- function(x) sum(getUniques(x))
# Table that will count number of reads for each process of interest (input reads, filtered reads, denoised reads, non chimera reads)
track <- cbind(out2,
sapply(dadaFs, getN),
sapply(dadaRs, getN),
sapply(mergers, getN),
rowSums(seqtable.nochim),
lapply(rowSums(seqtable.nochim)*100/out2[,1], as.integer))
# Assign column and row names
colnames(track) <- c("input", "quality-filt", "denoisedF", "denoisedR", 'merged', 'nonchim', "%input->output")
rownames(track) <- sample.names
# Show final table: for each row/sample, we have shown the initial number of reads, filtered reads, denoised reads, and non chimera reads
track
## input quality-filt denoisedF denoisedR merged nonchim
## SRR10142826 51061 48240 47595 42999 25765 25125
## SRR10142827 55164 52238 51737 46581 36647 36052
## SRR10142828 53155 50320 49749 44659 40497 37561
## SRR10142829 51938 48538 48260 44164 13490 11821
## SRR10142830 52139 49449 49033 44509 34429 31700
## SRR10142831 54075 51228 50808 46246 41560 38658
## SRR10142832 51364 48822 48369 43474 42523 37465
## SRR10142833 32638 21143 19973 11402 14800 12223
## SRR10142834 38514 25140 24621 15301 20570 16896
## SRR10142835 42417 26199 24779 13185 17047 13929
## SRR10142836 45595 28699 26538 15295 16627 13025
## SRR10142837 35822 21822 20798 10934 15913 14533
## SRR10142838 34463 21734 20376 11597 14325 10583
## SRR10142839 41761 27285 26348 14927 23135 17393
## SRR10142840 32501 19495 17893 9839 9703 8594
## SRR10142841 22694 14641 14105 7748 10540 8653
## SRR10142842 50554 48003 47563 42883 41882 36671
## SRR10142843 21956 14145 13163 7622 9666 7818
## SRR10142844 35708 23147 22147 13113 15689 12396
## SRR10142845 29069 19484 19012 10463 14959 13152
## SRR10142846 44981 28703 28205 18500 20603 14286
## SRR10142847 33463 21424 19670 10964 11471 9524
## SRR10142848 51762 49268 48935 44904 41034 39115
## SRR10142849 54523 51877 51327 45396 45133 41114
## SRR10142850 52637 50266 49777 44876 43934 37264
## SRR10142851 54179 51760 51279 46334 45154 41162
## SRR10142852 54060 51373 50991 46228 43377 40787
## SRR10142853 54531 51848 51214 46537 43029 40390
## SRR10142854 54434 51456 51084 46085 42072 40595
## %input->output
## SRR10142826 49
## SRR10142827 65
## SRR10142828 70
## SRR10142829 22
## SRR10142830 60
## SRR10142831 71
## SRR10142832 72
## SRR10142833 37
## SRR10142834 43
## SRR10142835 32
## SRR10142836 28
## SRR10142837 40
## SRR10142838 30
## SRR10142839 41
## SRR10142840 26
## SRR10142841 38
## SRR10142842 72
## SRR10142843 35
## SRR10142844 34
## SRR10142845 45
## SRR10142846 31
## SRR10142847 28
## SRR10142848 75
## SRR10142849 75
## SRR10142850 70
## SRR10142851 75
## SRR10142852 75
## SRR10142853 74
## SRR10142854 74
Extensions: The dada2 package also implements a method to make species level assignments based on exact matching between ASVs and sequenced reference strains. Recent analysis suggests that exact matching (or 100% identity) is the only appropriate way to assign species to 16S gene fragments. Currently, species-assignment training fastas are available for the Silva and RDP 16S databases. To follow the optional species addition step, download the silva_species_assignment_v132.fa.gz file, and place it in the directory with the fastq files.
# Assign taxonomy (with silva v138)
taxa <- assignTaxonomy(seqtable.nochim, "~/Projects/IBS_Meta-analysis_16S/data/silva_nr_v138_train_set.fa.gz",
tryRC = TRUE, # try reverse complement of the sequences
multithread=TRUE, verbose = TRUE)
# Add species assignment
taxa <- addSpecies(taxa, "~/Projects/IBS_Meta-analysis_16S/data/silva_species_assignment_v138.fa.gz")
# Same with our optimized seqtable
taxa.small <- assignTaxonomy(seqtable.small.nochim, "~/Projects/IBS_Meta-analysis_16S/data/silva_nr_v138_train_set.fa.gz",
tryRC = TRUE, # try reverse complement of the sequences
multithread=TRUE, verbose = TRUE)
taxa.small <- addSpecies(taxa.small, "~/Projects/IBS_Meta-analysis_16S/data/silva_species_assignment_v138.fa.gz")
# Check how the taxonomy table looks like
taxa.print <- taxa
rownames(taxa.print) <- NULL # Removing sequence rownames for display only
head(taxa.print)
## Kingdom Phylum Class
## [1,] "Bacteria" "Firmicutes" "Clostridia"
## [2,] "Bacteria" "Firmicutes" "Clostridia"
## [3,] "Bacteria" "Firmicutes" "Clostridia"
## [4,] "Bacteria" "Firmicutes" "Clostridia"
## [5,] "Bacteria" "Firmicutes" "Clostridia"
## [6,] "Bacteria" "Bacteroidota" "Bacteroidia"
## Order Family
## [1,] "Peptostreptococcales-Tissierellales" "Peptostreptococcaceae"
## [2,] "Lachnospirales" "Lachnospiraceae"
## [3,] "Clostridiales" "Clostridiaceae"
## [4,] "Peptostreptococcales-Tissierellales" "Peptostreptococcaceae"
## [5,] "Lachnospirales" "Lachnospiraceae"
## [6,] "Bacteroidales" "Bacteroidaceae"
## Genus Species
## [1,] "Intestinibacter" "bartlettii"
## [2,] "Blautia" NA
## [3,] "Clostridium_sensu_stricto_1" NA
## [4,] "Romboutsia" NA
## [5,] "Lachnoclostridium" NA
## [6,] "Bacteroides" "plebeius"
table(taxa.print[,1]) # Show the different kingdoms (should be only bacteria)
##
## Bacteria Eukaryota
## 1055 15
taxa.print[taxa.print[,1] == 'Eukaryota',2] # the eukaryota are all NAs
## [1] NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA
table(taxa.print[,2]) # Show the different phyla
##
## Actinobacteriota Bacteroidota Cyanobacteria Deinococcota
## 57 334 1 1
## Desulfobacterota Firmicutes Fusobacteriota Patescibacteria
## 2 557 11 2
## Proteobacteria Synergistota Verrucomicrobiota
## 82 1 3
table(is.na(taxa.print[,2])) # is there any NA phyla?
##
## FALSE TRUE
## 1051 19
# Check how the taxonomy table looks like
taxa.small.print <- taxa.small
rownames(taxa.small.print) <- NULL # Removing sequence rownames for display only
head(taxa.small.print)
## Kingdom Phylum Class
## [1,] "Bacteria" "Firmicutes" "Clostridia"
## [2,] "Bacteria" "Firmicutes" "Clostridia"
## [3,] "Bacteria" "Firmicutes" "Clostridia"
## [4,] "Bacteria" "Firmicutes" "Clostridia"
## [5,] "Bacteria" "Firmicutes" "Clostridia"
## [6,] "Bacteria" "Bacteroidota" "Bacteroidia"
## Order Family
## [1,] "Peptostreptococcales-Tissierellales" "Peptostreptococcaceae"
## [2,] "Lachnospirales" "Lachnospiraceae"
## [3,] "Clostridiales" "Clostridiaceae"
## [4,] "Peptostreptococcales-Tissierellales" "Peptostreptococcaceae"
## [5,] "Lachnospirales" "Lachnospiraceae"
## [6,] "Bacteroidales" "Bacteroidaceae"
## Genus Species
## [1,] "Intestinibacter" "bartlettii"
## [2,] "Blautia" NA
## [3,] "Clostridium_sensu_stricto_1" NA
## [4,] "Romboutsia" NA
## [5,] NA NA
## [6,] "Bacteroides" "plebeius"
table(taxa.small.print[,1]) # Show the different kingdoms (should be only bacteria)
##
## Bacteria
## 681
table(taxa.small.print[,2]) # Show the different phyla
##
## Actinobacteriota Bacteroidota Cyanobacteria Deinococcota
## 34 161 1 1
## Desulfobacterota Firmicutes Fusobacteriota Proteobacteria
## 2 432 8 38
## Synergistota Verrucomicrobiota
## 1 2
table(is.na(taxa.small.print[,2])) # is there any NA phyla?
##
## FALSE TRUE
## 680 1
# taxa dataframes
taxa.df <- data.frame(taxa)
taxa.df$asv <- rownames(taxa.df)
taxa.small.df <- data.frame(taxa.small)
taxa.small.df$asv <- rownames(taxa.small.df)
# remove chimera from asv pre/post cut dataframe
asv.df.nochim <- asv.df[asv.df$pre_cut %in% rownames(taxa.df),]
colnames(asv.df.nochim) <- c("asvID", "asv_precut", "asv_postcut")
# Sanity checks
# table(duplicated(asv.df.nochim$post_cut))
# dim(asv.df.nochim)
# table(asv.df.nochim$pre_cut %in% rownames(taxa))
# table(asv.df.nochim$post_cut %in% rownames(taxa.small)) # 59 ASVs post-cut are not in the taxonomic table (probably because they are chimeras)
# Compare taxonomic assignment pre/post-cut (phylum level)
compare.df <- merge(asv.df.nochim, taxa.df[,c("Phylum", "asv")], by.x="asv_precut", by.y="asv") # pre-cut phylum
colnames(compare.df)[4] <- "Phylum_precut"
compare.df <- merge(x=compare.df, y=taxa.small.df[,c("Phylum", "asv")], by.x="asv_postcut", by.y="asv") # post-cut phylum
colnames(compare.df)[5] <- "Phylum_postcut"
# The 59 ASVs present in pre-cut but not in post-cut were removed (59 rows were deleted)
dim(compare.df)
## [1] 1011 5
# How many rows (ASVs) have different phylum?
table(compare.df$Phylum_precut != compare.df$Phylum_postcut, useNA="ifany") # 29 ASVs assigned to different phyla
##
## FALSE TRUE <NA>
## 978 29 4
phylum.all <- compare.df %>%
select(asvID, Phylum_precut, Phylum_postcut) %>%
rename(precut=Phylum_precut, postcut=Phylum_postcut) %>%
mutate(diff=ifelse(precut != postcut, TRUE, FALSE)) %>%
tidyr::pivot_longer(!c(asvID, diff), names_to="cut", values_to="Phylum")
# Plot
library(ggalluvial)
ggplot(phylum.all,
aes(x = cut, stratum = Phylum, alluvium = asvID,
label = Phylum)) +
geom_flow(aes(color=diff),stat = "alluvium", lode.guidance = "frontback") +
geom_stratum(aes(fill=Phylum)) +
scale_color_manual(values=c("lightgrey", "#de2d26", "lightgrey"))+
scale_fill_brewer(type = "qual", palette = "Set3") +
theme_bw()+
labs(x="", y="#ASVs", title="ASV taxonomic assignment (with/without cutting)", color='Different?')
# ggsave("~/Projects/IBS_Meta-analysis_16S/data/analysis-individual/Zhu-2019/01_Dada2-Zhu/pre-post-cut_taxAssignment_phylum.jpg", width=10, height=6)
# *********************
# SAME AT GENUS LEVEL
compare.df.genus <- merge(asv.df.nochim, taxa.df[,c("Genus", "asv")], by.x="asv_precut", by.y="asv") # pre-cut genus
colnames(compare.df.genus)[4] <- "Genus_precut"
compare.df.genus <- merge(x=compare.df.genus, y=taxa.small.df[,c("Genus", "asv")], by.x="asv_postcut", by.y="asv") # post-cut genus
colnames(compare.df.genus)[5] <- "Genus_postcut"
dim(compare.df.genus)
## [1] 1011 5
# How many rows (ASVs) have different phylum?
table(compare.df.genus$Genus_precut != compare.df.genus$Genus_postcut, useNA="ifany") # 22 ASVs assigned to diff. genus, but many more NAs
##
## FALSE TRUE <NA>
## 782 22 207
genus.all <- compare.df.genus %>%
select(asvID, Genus_precut, Genus_postcut) %>%
rename(precut=Genus_precut, postcut=Genus_postcut) %>%
mutate(diff=ifelse(precut != postcut, TRUE, FALSE)) %>%
tidyr::pivot_longer(!c(asvID, diff), names_to="cut", values_to="Genus")
# Plot
ggplot(genus.all,
aes(x = cut, stratum = Genus, alluvium = asvID,
label = Genus)) +
geom_flow(aes(color=diff),stat = "alluvium", lode.guidance = "frontback") +
geom_stratum(aes(fill=Genus)) +
scale_color_manual(values=c("lightgrey", "#de2d26", "lightgrey"))+
theme_bw()+
theme(legend.text = element_text(size=3),
legend.key.size = unit(0.1, "cm"))+
labs(x="", y="#ASVs", title="ASV taxonomic assignment (with/without cutting)", color='Different?')+
guides(fill=guide_legend(ncol=3))
# ggsave("~/Projects/IBS_Meta-analysis_16S/data/analysis-individual/Zhu-2019/01_Dada2-Zhu/pre-post-cut_taxAssignment_genus.jpg", width=10, height=6)
We will remove any sample with less than 500 reads from further analysis, and also any ASVs with unassigned phyla.
The preprocessing will be easier to do with ASV, taxonomic and metadata tables combined in a phyloseq object.
#_________________________
# Import metadata
metadata_table <- read.table("~/Projects/IBS_Meta-analysis_16S/data/analysis-individual/Zhu-2019/ZhuSraRunTable.txt", header = TRUE, sep = ",")
colnames(metadata_table)
# Keep relevant data
sampledf <- metadata_table[, c("Run", "Sample.Name", "Age", "sex")]
rownames(sampledf) <- paste(metadata_table$Run) # have the same sample.names
colnames(sampledf) <- c("Run", "host_disease", "host_age", "host_sex")
# Simplify data
sampledf$host_disease <- as.character(sampledf$host_disease)
sampledf$host_disease[substr(sampledf$host_disease, start = 1, stop = 2) == "N_"] <- "Healthy" # Replace with better names
sampledf$host_disease[substr(sampledf$host_disease, start = 1, stop = 2) == "F_"] <- "IBS" # Replace with better names
sampledf[sampledf$host_disease == "Healthy", "host_subtype"] <- "HC"
sampledf[sampledf$host_disease == "IBS", "host_subtype"] <- "IBS-unspecified"
sampledf$Collection <- "1st" # only 1 collection point per individual
sampledf$sample_type <- "stool"
sampledf$author <- "Zhu"
sampledf$sequencing_tech <- "Illumina paired-end"
sampledf$variable_region <- "V4"
#_________________________
# Create phyloseq object
physeq <- phyloseq(otu_table(seqtable.nochim, taxa_are_rows=FALSE), # by default, in otu_table the sequence variants are in rows
sample_data(sampledf),
tax_table(taxa))
physeq.small <- phyloseq(otu_table(seqtable.small.nochim, taxa_are_rows=FALSE),
sample_data(sampledf),
tax_table(taxa.small))
# Remove taxa that are eukaryota, or have unassigned Phyla
physeq <- subset_taxa(physeq, Kingdom != "Eukaryota")
physeq <- subset_taxa(physeq, !is.na(Phylum))
# Remove samples with less than 500 reads
physeq <- prune_samples(sample_sums(physeq)>=500, physeq)
# No sample was deleted, so we don't need to remove taxa present in low-count samples
# physeq <- prune_taxa(taxa_sums(physeq)>0, physeq)
# Remove taxa that are eukaryota, or have unassigned Phyla
physeq.small <- subset_taxa(physeq.small, Kingdom != "Eukaryota")
physeq.small <- subset_taxa(physeq.small, !is.na(Phylum))
# Remove samples with less than 500 reads
physeq.small <- prune_samples(sample_sums(physeq.small)>=500, physeq.small)
# Save to disk
saveRDS(raw_stats, file.path(path, "01_Dada2-Zhu/raw_stats.rds"))
saveRDS(out2, file.path(path, "01_Dada2-Zhu/out2.rds"))
saveRDS(errF, file.path(path, "01_Dada2-Zhu/errF.rds"))
saveRDS(errR, file.path(path, "01_Dada2-Zhu/errR.rds"))
saveRDS(dadaFs, file.path(path, "01_Dada2-Zhu/infered_seq_F.rds"))
saveRDS(dadaRs, file.path(path, "01_Dada2-Zhu/infered_seq_R.rds"))
saveRDS(mergers, file.path(path, "01_Dada2-Zhu/mergers.rds"))
saveRDS(otu_table(physeq), file.path(path, "01_Dada2-Zhu/ASVtable_final.rds")) # ASV table
saveRDS(tax_table(physeq), file.path(path, "01_Dada2-Zhu/taxa_final.rds")) # taxa table
saveRDS(sample_data(physeq), file.path(path, "01_Dada2-Zhu/metadata_final.rds")) # metadata table
write.csv(sample_data(physeq), file.path(path, "01_Dada2-Zhu/ZhuMetadata.csv"))
# Taxa & Phyloseq object
saveRDS(taxa, "~/Projects/IBS_Meta-analysis_16S/data/analysis-individual/CLUSTER/Taxonomy/output/taxa_zhu.rds")
saveRDS(taxa.small, "~/Projects/IBS_Meta-analysis_16S/data/analysis-individual/CLUSTER/Taxonomy/output/taxa_zhu_shortASV.rds")
saveRDS(physeq, "~/Projects/IBS_Meta-analysis_16S/data/analysis-individual/CLUSTER/PhyloTree/input/physeq_zhu.rds")
saveRDS(physeq.small, file.path(path, "01_Dada2-Zhu/physeq_zhu_shortASV.rds")) # phyloseq object with shorter ASVs
# Absolute abundance
plot_bar(physeq, fill = "Phylum")+ facet_wrap("host_disease", scales="free_x") + theme(axis.text.x = element_blank())
# Relative abundance for Phylum
phylum.table <- physeq %>%
tax_glom(taxrank = "Phylum") %>% # agglomerate at phylum level
transform_sample_counts(function(x) {x/sum(x)} ) %>% # Transform to rel. abundance
psmelt() # Melt to long format
ggplot(phylum.table, aes(x = Sample, y = Abundance, fill = Phylum))+
facet_wrap(~ host_disease, scales = "free") + # scales = "free" removes empty lines
geom_bar(stat = "identity") +
theme(axis.text.x = element_text(size = 5, angle = -90))+
labs(x = "Samples", y = "Relative abundance")
# Absolute abundance
plot_bar(physeq.small, fill = "Phylum")+ facet_wrap("host_disease", scales="free_x") + theme(axis.text.x = element_blank())
# Relative abundance for Phylum
phylum.table <- physeq.small %>%
tax_glom(taxrank = "Phylum") %>% # agglomerate at phylum level
transform_sample_counts(function(x) {x/sum(x)} ) %>% # Transform to rel. abundance
psmelt() # Melt to long format
ggplot(phylum.table, aes(x = Sample, y = Abundance, fill = Phylum))+
facet_wrap(~ host_disease, scales = "free") + # scales = "free" removes empty lines
geom_bar(stat = "identity") +
theme(axis.text.x = element_text(size = 5, angle = -90))+
labs(x = "Samples", y = "Relative abundance")
For any further analysis, we will be doing it on the physeq_shortASV object, as the ASVs sequences better reflect the V4 variable region.